library(tidyverse)
library(AmesHousing)
library(recipes)
library(caret)
library(rpart)
library(rpart.plot)
library(ranger)
library(xgboost)
library(AUC)

Data prep

Árvore de decisão

getModelInfo("rpart", FALSE)$rpart
$label
[1] "CART"

$library
[1] "rpart"

$type
[1] "Regression"     "Classification"

$parameters

$grid
function(x, y, len = NULL, search = "grid"){
                    dat <- if(is.data.frame(x)) x else as.data.frame(x)
                    dat$.outcome <- y
                    initialFit <- rpart::rpart(.outcome ~ .,
                                               data = dat,
                                               control = rpart::rpart.control(cp = 0))$cptable
                    initialFit <- initialFit[order(-initialFit[,"CP"]), , drop = FALSE]
                    if(search == "grid") {
                      if(nrow(initialFit) < len) {
                        tuneSeq <- data.frame(cp = seq(min(initialFit[, "CP"]),
                                                       max(initialFit[, "CP"]),
                                                       length = len))
                      } else tuneSeq <-  data.frame(cp = initialFit[1:len,"CP"])
                      colnames(tuneSeq) <- "cp"
                    } else {
                      tuneSeq <- data.frame(cp = unique(sample(initialFit[, "CP"], size = len, replace = TRUE)))
                    }

                    tuneSeq
                  }

$loop
function(grid) {
                    grid <- grid[order(grid$cp, decreasing = FALSE),, drop = FALSE]
                    loop <- grid[1,,drop = FALSE]
                    submodels <- list(grid[-1,,drop = FALSE])
                    list(loop = loop, submodels = submodels)
                  }

$fit
function(x, y, wts, param, lev, last, classProbs, ...) {
                    cpValue <- if(!last) param$cp else 0
                    theDots <- list(...)
                    if(any(names(theDots) == "control"))
                    {
                      theDots$control$cp <- cpValue
                      theDots$control$xval <- 0
                      ctl <- theDots$control
                      theDots$control <- NULL
                    } else ctl <- rpart::rpart.control(cp = cpValue, xval = 0)

                    ## check to see if weights were passed in (and availible)
                    if(!is.null(wts)) theDots$weights <- wts

                    modelArgs <- c(list(formula = as.formula(".outcome ~ ."),
                                        data = if(is.data.frame(x)) x else as.data.frame(x),
                                        control = ctl),
                                   theDots)
                    modelArgs$data$.outcome <- y

                    out <- do.call(rpart::rpart, modelArgs)

                    if(last) out <- rpart::prune.rpart(out, cp = param$cp)
                    out
                  }

$predict
function(modelFit, newdata, submodels = NULL) {
                    if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)

                    pType <- if(modelFit$problemType == "Classification") "class" else "vector"
                    out  <- predict(modelFit, newdata, type=pType)

                    if(!is.null(submodels))
                    {
                      tmp <- vector(mode = "list", length = nrow(submodels) + 1)
                      tmp[[1]] <- out
                      for(j in seq(along = submodels$cp))
                      {
                        prunedFit <- rpart::prune.rpart(modelFit, cp = submodels$cp[j])
                        tmp[[j+1]]  <- predict(prunedFit, newdata, type=pType)
                      }
                      out <- tmp
                    }
                    out
                  }

$prob
function(modelFit, newdata, submodels = NULL) {
                    if(!is.data.frame(newdata)) newdata <- as.data.frame(newdata)
                    out <- predict(modelFit, newdata, type = "prob")

                    if(!is.null(submodels))
                    {
                      tmp <- vector(mode = "list", length = nrow(submodels) + 1)
                      tmp[[1]] <- out
                      for(j in seq(along = submodels$cp))
                      {
                        prunedFit <- rpart::prune.rpart(modelFit, cp = submodels$cp[j])
                        tmpProb <- predict(prunedFit, newdata, type = "prob")
                        tmp[[j+1]] <- as.data.frame(tmpProb[, modelFit$obsLevels, drop = FALSE])
                      }
                      out <- tmp
                    }
                    out
                  }

$predictors
function(x, surrogate = TRUE, ...)  {
                    out <- as.character(x$frame$var)
                    out <- out[!(out %in% c("<leaf>"))]
                    if(surrogate)
                    {
                      splits <- x$splits
                      splits <- splits[splits[,"adj"] > 0,]
                      out <- c(out, rownames(splits))
                    }
                    unique(out)
                  }

$varImp
function(object, surrogates = FALSE, competes = TRUE, ...) {
                    if(nrow(object$splits)>0) {
                      tmp <- rownames(object$splits)
                      rownames(object$splits) <- 1:nrow(object$splits)
                      splits <- data.frame(object$splits)
                      splits$var <- tmp
                      splits$type <- ""

                      frame <- as.data.frame(object$frame)
                      index <- 0
                      for(i in 1:nrow(frame)) {
                        if(frame$var[i] != "<leaf>") {
                          index <- index + 1
                          splits$type[index] <- "primary"
                          if(frame$ncompete[i] > 0) {
                            for(j in 1:frame$ncompete[i]) {
                              index <- index + 1
                              splits$type[index] <- "competing"
                            }
                          }
                          if(frame$nsurrogate[i] > 0) {
                            for(j in 1:frame$nsurrogate[i]) {
                              index <- index + 1
                              splits$type[index] <- "surrogate"
                            }
                          }
                        }
                      }
                      splits$var <- factor(as.character(splits$var))
                      if(!surrogates) splits <- subset(splits, type != "surrogate")
                      if(!competes) splits <- subset(splits, type != "competing")
                      out <- aggregate(splits$improve,
                                       list(Variable = splits$var),
                                       sum,
                                       na.rm = TRUE)
                    } else {
              out <- data.frame(x = numeric(), Vaiable = character())
            }
                    allVars <- colnames(attributes(object$terms)$factors)
                    if(!all(allVars %in% out$Variable)) {
                      missingVars <- allVars[!(allVars %in% out$Variable)]
                      zeros <- data.frame(x = rep(0, length(missingVars)),
                                          Variable = missingVars)
                      out <- rbind(out, zeros)
                    }
                    out2 <- data.frame(Overall = out$x)
                    rownames(out2) <- out$Variable
                    out2
                  }

$levels
function(x) x$obsLevels

$trim
function(x) {
                    x$call <- list(na.action = (x$call)$na.action)
                    x$x <- NULL
                    x$y <- NULL
                    x$where <- NULL
                    x
                  }

$tags
[1] "Tree-Based Model"              "Implicit Feature Selection"   
[3] "Handle Missing Predictor Data" "Accepts Case Weights"         

$sort
function(x) x[order(x[,1], decreasing = TRUE),]
modelo_rpart <- train(
  receita, 
  credit_data %>% filter(base == "treino") %>% select(-base), 
  method = "rpart", 
  metric = "ROC",
  trControl = train_control_rpart,
  tuneGrid = grid_rpart
)
Preparing recipe
+ Fold1: cp=-0.001 
- Fold1: cp=-0.001 
+ Fold2: cp=-0.001 
- Fold2: cp=-0.001 
+ Fold3: cp=-0.001 
- Fold3: cp=-0.001 
+ Fold4: cp=-0.001 
- Fold4: cp=-0.001 
+ Fold5: cp=-0.001 
- Fold5: cp=-0.001 
Aggregating results
Selecting tuning parameters
Fitting cp = 2e-04 on full training set

Resultado

pdf("arvore.pdf", 20, 10)
rpart.plot(modelo_rpart$finalModel)
dev.off()
null device 
          1 
caret::confusionMatrix(
  predict(modelo_rpart, credit_data_teste), 
  credit_data_teste$Status, 
  mode = "everything"
)
Confusion Matrix and Statistics

          Reference
Prediction bad good
      bad  191  145
      good 195  853
                                          
               Accuracy : 0.7543          
                 95% CI : (0.7308, 0.7768)
    No Information Rate : 0.7211          
    P-Value [Acc > NIR] : 0.002910        
                                          
                  Kappa : 0.364           
                                          
 Mcnemar's Test P-Value : 0.007875        
                                          
            Sensitivity : 0.4948          
            Specificity : 0.8547          
         Pos Pred Value : 0.5685          
         Neg Pred Value : 0.8139          
              Precision : 0.5685          
                 Recall : 0.4948          
                     F1 : 0.5291          
             Prevalence : 0.2789          
         Detection Rate : 0.1380          
   Detection Prevalence : 0.2428          
      Balanced Accuracy : 0.6748          
                                          
       'Positive' Class : bad             
                                          

Random Forest

infos$grid
function(x, y, len = NULL, search = "grid") {
                    if(search == "grid") {
                      srule <-
                        if (is.factor(y))
                          "gini"
                      else
                        "variance"
                      out <- expand.grid(mtry = 
                                          caret::var_seq(p = ncol(x),
                                                         classification = is.factor(y),
                                                         len = len),
                                         min.node.size = ifelse( is.factor(y), 1, 5), 
                                        splitrule = c(srule, "extratrees"))
                    } else {
                      srules <- if (is.factor(y))
                        c("gini", "extratrees")
                      else
                        c("variance", "extratrees", "maxstat")
                      out <-
                        data.frame(
                          min.node.size= sample(1:(min(20,nrow(x))), size = len, replace = TRUE), 
                          mtry = sample(1:ncol(x), size = len, replace = TRUE),
                          splitrule = sample(srules, size = len, replace = TRUE)
                        )
                    }
                    out
                  }
modelo_rf <- train(
  receita,
  credit_data %>% filter(base %in% "treino") %>% select(-base),
  method = "ranger", #PREENCHA AQUI
  importance = "permutation",
  metric = "ROC",
  trControl = train_control_rf,
  tuneLength = 5
)
Preparing recipe
+ Fold1: mtry= 2, min.node.size=1, splitrule=gini 
- Fold1: mtry= 2, min.node.size=1, splitrule=gini 
+ Fold1: mtry= 6, min.node.size=1, splitrule=gini 
- Fold1: mtry= 6, min.node.size=1, splitrule=gini 
+ Fold1: mtry=10, min.node.size=1, splitrule=gini 
- Fold1: mtry=10, min.node.size=1, splitrule=gini 
+ Fold1: mtry=14, min.node.size=1, splitrule=gini 
- Fold1: mtry=14, min.node.size=1, splitrule=gini 
+ Fold1: mtry=18, min.node.size=1, splitrule=gini 
- Fold1: mtry=18, min.node.size=1, splitrule=gini 
+ Fold1: mtry= 2, min.node.size=1, splitrule=extratrees 
- Fold1: mtry= 2, min.node.size=1, splitrule=extratrees 
+ Fold1: mtry= 6, min.node.size=1, splitrule=extratrees 
- Fold1: mtry= 6, min.node.size=1, splitrule=extratrees 
+ Fold1: mtry=10, min.node.size=1, splitrule=extratrees 
- Fold1: mtry=10, min.node.size=1, splitrule=extratrees 
+ Fold1: mtry=14, min.node.size=1, splitrule=extratrees 
- Fold1: mtry=14, min.node.size=1, splitrule=extratrees 
+ Fold1: mtry=18, min.node.size=1, splitrule=extratrees 
- Fold1: mtry=18, min.node.size=1, splitrule=extratrees 
+ Fold2: mtry= 2, min.node.size=1, splitrule=gini 
- Fold2: mtry= 2, min.node.size=1, splitrule=gini 
+ Fold2: mtry= 6, min.node.size=1, splitrule=gini 
- Fold2: mtry= 6, min.node.size=1, splitrule=gini 
+ Fold2: mtry=10, min.node.size=1, splitrule=gini 
- Fold2: mtry=10, min.node.size=1, splitrule=gini 
+ Fold2: mtry=14, min.node.size=1, splitrule=gini 
- Fold2: mtry=14, min.node.size=1, splitrule=gini 
+ Fold2: mtry=18, min.node.size=1, splitrule=gini 
- Fold2: mtry=18, min.node.size=1, splitrule=gini 
+ Fold2: mtry= 2, min.node.size=1, splitrule=extratrees 
- Fold2: mtry= 2, min.node.size=1, splitrule=extratrees 
+ Fold2: mtry= 6, min.node.size=1, splitrule=extratrees 
- Fold2: mtry= 6, min.node.size=1, splitrule=extratrees 
+ Fold2: mtry=10, min.node.size=1, splitrule=extratrees 
- Fold2: mtry=10, min.node.size=1, splitrule=extratrees 
+ Fold2: mtry=14, min.node.size=1, splitrule=extratrees 
- Fold2: mtry=14, min.node.size=1, splitrule=extratrees 
+ Fold2: mtry=18, min.node.size=1, splitrule=extratrees 
- Fold2: mtry=18, min.node.size=1, splitrule=extratrees 
+ Fold3: mtry= 2, min.node.size=1, splitrule=gini 
- Fold3: mtry= 2, min.node.size=1, splitrule=gini 
+ Fold3: mtry= 6, min.node.size=1, splitrule=gini 
- Fold3: mtry= 6, min.node.size=1, splitrule=gini 
+ Fold3: mtry=10, min.node.size=1, splitrule=gini 
- Fold3: mtry=10, min.node.size=1, splitrule=gini 
+ Fold3: mtry=14, min.node.size=1, splitrule=gini 
- Fold3: mtry=14, min.node.size=1, splitrule=gini 
+ Fold3: mtry=18, min.node.size=1, splitrule=gini 
- Fold3: mtry=18, min.node.size=1, splitrule=gini 
+ Fold3: mtry= 2, min.node.size=1, splitrule=extratrees 
- Fold3: mtry= 2, min.node.size=1, splitrule=extratrees 
+ Fold3: mtry= 6, min.node.size=1, splitrule=extratrees 
- Fold3: mtry= 6, min.node.size=1, splitrule=extratrees 
+ Fold3: mtry=10, min.node.size=1, splitrule=extratrees 
- Fold3: mtry=10, min.node.size=1, splitrule=extratrees 
+ Fold3: mtry=14, min.node.size=1, splitrule=extratrees 
- Fold3: mtry=14, min.node.size=1, splitrule=extratrees 
+ Fold3: mtry=18, min.node.size=1, splitrule=extratrees 
- Fold3: mtry=18, min.node.size=1, splitrule=extratrees 
+ Fold4: mtry= 2, min.node.size=1, splitrule=gini 
- Fold4: mtry= 2, min.node.size=1, splitrule=gini 
+ Fold4: mtry= 6, min.node.size=1, splitrule=gini 
- Fold4: mtry= 6, min.node.size=1, splitrule=gini 
+ Fold4: mtry=10, min.node.size=1, splitrule=gini 
- Fold4: mtry=10, min.node.size=1, splitrule=gini 
+ Fold4: mtry=14, min.node.size=1, splitrule=gini 
- Fold4: mtry=14, min.node.size=1, splitrule=gini 
+ Fold4: mtry=18, min.node.size=1, splitrule=gini 
- Fold4: mtry=18, min.node.size=1, splitrule=gini 
+ Fold4: mtry= 2, min.node.size=1, splitrule=extratrees 
- Fold4: mtry= 2, min.node.size=1, splitrule=extratrees 
+ Fold4: mtry= 6, min.node.size=1, splitrule=extratrees 
- Fold4: mtry= 6, min.node.size=1, splitrule=extratrees 
+ Fold4: mtry=10, min.node.size=1, splitrule=extratrees 
- Fold4: mtry=10, min.node.size=1, splitrule=extratrees 
+ Fold4: mtry=14, min.node.size=1, splitrule=extratrees 
- Fold4: mtry=14, min.node.size=1, splitrule=extratrees 
+ Fold4: mtry=18, min.node.size=1, splitrule=extratrees 
- Fold4: mtry=18, min.node.size=1, splitrule=extratrees 
+ Fold5: mtry= 2, min.node.size=1, splitrule=gini 
- Fold5: mtry= 2, min.node.size=1, splitrule=gini 
+ Fold5: mtry= 6, min.node.size=1, splitrule=gini 
- Fold5: mtry= 6, min.node.size=1, splitrule=gini 
+ Fold5: mtry=10, min.node.size=1, splitrule=gini 
- Fold5: mtry=10, min.node.size=1, splitrule=gini 
+ Fold5: mtry=14, min.node.size=1, splitrule=gini 
- Fold5: mtry=14, min.node.size=1, splitrule=gini 
+ Fold5: mtry=18, min.node.size=1, splitrule=gini 
- Fold5: mtry=18, min.node.size=1, splitrule=gini 
+ Fold5: mtry= 2, min.node.size=1, splitrule=extratrees 
- Fold5: mtry= 2, min.node.size=1, splitrule=extratrees 
+ Fold5: mtry= 6, min.node.size=1, splitrule=extratrees 
- Fold5: mtry= 6, min.node.size=1, splitrule=extratrees 
+ Fold5: mtry=10, min.node.size=1, splitrule=extratrees 
- Fold5: mtry=10, min.node.size=1, splitrule=extratrees 
+ Fold5: mtry=14, min.node.size=1, splitrule=extratrees 
- Fold5: mtry=14, min.node.size=1, splitrule=extratrees 
+ Fold5: mtry=18, min.node.size=1, splitrule=extratrees 
- Fold5: mtry=18, min.node.size=1, splitrule=extratrees 
Aggregating results
Selecting tuning parameters
Fitting mtry = 2, splitrule = gini, min.node.size = 1 on full training set

Resultado

caret::confusionMatrix(predict(modelo_rf, credit_data_teste), credit_data_teste$Status, mode = "everything")
Confusion Matrix and Statistics

          Reference
Prediction bad good
      bad  146   41
      good 240  957
                                          
               Accuracy : 0.797           
                 95% CI : (0.7748, 0.8179)
    No Information Rate : 0.7211          
    P-Value [Acc > NIR] : 5.126e-11       
                                          
                  Kappa : 0.4005          
                                          
 Mcnemar's Test P-Value : < 2.2e-16       
                                          
            Sensitivity : 0.3782          
            Specificity : 0.9589          
         Pos Pred Value : 0.7807          
         Neg Pred Value : 0.7995          
              Precision : 0.7807          
                 Recall : 0.3782          
                     F1 : 0.5096          
             Prevalence : 0.2789          
         Detection Rate : 0.1055          
   Detection Prevalence : 0.1351          
      Balanced Accuracy : 0.6686          
                                          
       'Positive' Class : bad             
                                          

XGBoost

Exercício: Ajuste um xgboost usando o caret e responda: qual modelo apresenta a maior AUC? crtl+C ctrl+V por sua conta!

DICA 1) troque “ranger” por “xgbTree” DICA 2) rode info <- getModelInfo("xgbTree", FALSE)$xgbTree e depois consulte info$parameters. DICA 3) experimente usar o parâmetro tuneLength = 20 em vez do `tuneGrid.

LS0tCnRpdGxlOiAiw4Fydm9yZSwgUmFuZG9tIEZvcmVzdCBlIFhHQm9vc3QiCm91dHB1dDogaHRtbF9ub3RlYm9vawotLS0KCmBgYHtyLCB3YXJuaW5nPUZBTFNFLCBtZXNzYWdlPUZBTFNFfQpsaWJyYXJ5KHRpZHl2ZXJzZSkKbGlicmFyeShBbWVzSG91c2luZykKbGlicmFyeShyZWNpcGVzKQpsaWJyYXJ5KGNhcmV0KQpsaWJyYXJ5KHJwYXJ0KQpsaWJyYXJ5KHJwYXJ0LnBsb3QpCmxpYnJhcnkocmFuZ2VyKQpsaWJyYXJ5KHhnYm9vc3QpCmxpYnJhcnkoQVVDKQpgYGAKCiMgRGF0YSBwcmVwCgpgYGB7cn0KZGF0YSgiY3JlZGl0X2RhdGEiKQoKc2V0LnNlZWQoNDIpCmNyZWRpdF9kYXRhIDwtIGNyZWRpdF9kYXRhICU+JQogIG11dGF0ZSgKICAgIGJhc2UgPSBpZl9lbHNlKHJ1bmlmKG5yb3coY3JlZGl0X2RhdGEpKSA8IDAuNywgInRyZWlubyIsICJ0ZXN0ZSIpCiAgKQoKcmVjZWl0YSA8LSByZWNpcGUoU3RhdHVzIH4gLiwgZGF0YSA9IGNyZWRpdF9kYXRhICU+JSBmaWx0ZXIoYmFzZSA9PSAidHJlaW5vIikgJT4lIHNlbGVjdCgtYmFzZSkpICU+JQogIHN0ZXBfbWVhbmltcHV0ZShhbGxfbnVtZXJpYygpLCAtYWxsX291dGNvbWVzKCkpICU+JQogIHN0ZXBfbW9kZWltcHV0ZShhbGxfbm9taW5hbCgpLCAtYWxsX291dGNvbWVzKCkpICU+JQogIHN0ZXBfZHVtbXkoYWxsX25vbWluYWwoKSwgLWFsbF9vdXRjb21lcygpKSAlPiUKICBzdGVwX2NvcnIoYWxsX3ByZWRpY3RvcnMoKSkgJT4lCiAgc3RlcF9uenYoYWxsX3ByZWRpY3RvcnMoKSkKYGBgCgoKIyDDgXJ2b3JlIGRlIGRlY2lzw6NvCgpgYGB7cn0KZ2V0TW9kZWxJbmZvKCJycGFydCIsIEZBTFNFKSRycGFydApgYGAKCmBgYHtyfQp0cmFpbl9jb250cm9sX3JwYXJ0IDwtIHRyYWluQ29udHJvbCgKICBtZXRob2QgPSAiY3YiLCAKICBudW1iZXIgPSA1LCAKICBjbGFzc1Byb2JzID0gVFJVRSwKICBzdW1tYXJ5RnVuY3Rpb24gPSB0d29DbGFzc1N1bW1hcnksCiAgdmVyYm9zZUl0ZXIgPSAxIAopCgojIERJQ0E6IHJvZGUKIyBpbmZvIDwtIGdldE1vZGVsSW5mbygicnBhcnQiLCBGQUxTRSkkcnBhcnQKIyBpbmZvJHBhcmFtZXRlcnMKCmdyaWRfcnBhcnQgPC0gZGF0YS5mcmFtZSgKICBjcCA9IHNlcSgtMC4wMDEsIDAuMDEsIGJ5PSAwLjAwMDEpCikKCm1vZGVsb19ycGFydCA8LSB0cmFpbigKICByZWNlaXRhLCAKICBjcmVkaXRfZGF0YSAlPiUgZmlsdGVyKGJhc2UgPT0gInRyZWlubyIpICU+JSBzZWxlY3QoLWJhc2UpLCAKICBtZXRob2QgPSAicnBhcnQiLCAKICBtZXRyaWMgPSAiUk9DIiwKICB0ckNvbnRyb2wgPSB0cmFpbl9jb250cm9sX3JwYXJ0LAogIHR1bmVHcmlkID0gZ3JpZF9ycGFydAopCmBgYAoKIyMgUmVzdWx0YWRvCgpgYGB7cn0KbW9kZWxvX3JwYXJ0Cm1vZGVsb19ycGFydCRiZXN0VHVuZQp2YXJJbXAobW9kZWxvX3JwYXJ0KQpwbG90KG1vZGVsb19ycGFydCkKYGBgCgpgYGB7cn0KIyBhcGVuYXMgcGFyYSBhcnZvcmVzCnJwYXJ0LnBsb3QobW9kZWxvX3JwYXJ0JGZpbmFsTW9kZWwpCnBkZigiYXJ2b3JlLnBkZiIsIDIwLCAxMCkKcnBhcnQucGxvdChtb2RlbG9fcnBhcnQkZmluYWxNb2RlbCkKZGV2Lm9mZigpCmBgYAoKCmBgYHtyfQojIE1hdHJpeiBkZSBjb25mdXPDo28KY3JlZGl0X2RhdGEgPC0gY3JlZGl0X2RhdGEgJT4lIAogIG11dGF0ZSgKICAgIHByZWRfcnBhcnQgPSBwcmVkaWN0KG1vZGVsb19ycGFydCwgLiwgdHlwZSA9ICJwcm9iIikkYmFkCiAgKQoKY3JlZGl0X2RhdGFfdGVzdGUgPC0gY3JlZGl0X2RhdGEgJT4lIGZpbHRlcihiYXNlICVpbiUgInRlc3RlIikKY2FyZXQ6OmNvbmZ1c2lvbk1hdHJpeCgKICBwcmVkaWN0KG1vZGVsb19ycGFydCwgY3JlZGl0X2RhdGFfdGVzdGUpLCAKICBjcmVkaXRfZGF0YV90ZXN0ZSRTdGF0dXMsIAogIG1vZGUgPSAiZXZlcnl0aGluZyIKKQpgYGAKCmBgYHtyfQojIEN1cnZhIFJPQwpjcmVkaXRfZGF0YV90ZXN0ZSA8LSBjcmVkaXRfZGF0YSAlPiUKICBmaWx0ZXIoYmFzZSAlaW4lICJ0ZXN0ZSIpICU+JQogIG11dGF0ZSgKICAgIFN0YXR1c19wYXJhX3JvYyA9IGZhY3RvcihpZl9lbHNlKFN0YXR1cyA9PSAiZ29vZCIsIDAsIDEpKQogICkgCgpyb2NfdGVzdGUgPC0gcm9jKAogIGNyZWRpdF9kYXRhX3Rlc3RlJHByZWRfcnBhcnQsIAogIGNyZWRpdF9kYXRhX3Rlc3RlJFN0YXR1c19wYXJhX3JvYwopCmF1Yyhyb2NfdGVzdGUpCnBsb3Qocm9jX3Rlc3RlKQpgYGAKCgpgYGB7cn0KI2N1cnZhIFJPQyBleHRyYSAgLS0tLSBjdWlkYWRvOiBjw7NkaWdvcyBkZSBSIGF2YW7Dp2Fkb3MhCnJvY19ycGFydCA8LSBjcmVkaXRfZGF0YSAlPiUKICBtdXRhdGUoCiAgICBTdGF0dXNfcGFyYV9yb2MgPSBmYWN0b3IoaWZfZWxzZShTdGF0dXMgPT0gImdvb2QiLCAwLCAxKSkKICApICU+JQogIGdyb3VwX2J5KGJhc2UpICU+JQogIG5lc3QoKSAlPiUKICBtdXRhdGUoCiAgICByb2MgPSBtYXAoZGF0YSwgfiByb2MoLngkcHJlZF9ycGFydCwgLngkU3RhdHVzX3BhcmFfcm9jKSksCiAgICBhdWMgPSBtYXBfZGJsKHJvYywgYXVjKQogICkKcm9jX3JwYXJ0CmBgYAoKCmBgYHtyfQpyb2NfcnBhcnQkcm9jICU+JSB3YWxrKHBsb3QpCmBgYAoKYGBge3J9CiMgZ3LDoWZpY28gZXh0cmEgLS0tLSBjdWlkYWRvOiBjw7NkaWdvcyBkZSBSIGF2YW7Dp2Fkb3MhCnJvY19wbG90IDwtIHJvY19ycGFydCAlPiUKICBzZWxlY3QoYmFzZSwgcm9jLCBhdWMpICU+JQogIG11dGF0ZSgKICAgIHJvYyA9IG1hcChyb2MsIH57CiAgICAgIC54ICU+JSAKICAgICAgICB1bmNsYXNzICU+JSAKICAgICAgICBhcy5kYXRhLmZyYW1lCiAgICB9KQogICkgJT4lCiAgdW5uZXN0ICU+JQogIGdncGxvdChhZXMoeCA9IGZwciwgeSA9IHRwciwgY29sb3VyID0gYmFzZSwgbGFiZWwgPSBjdXRvZmZzKSkgKwogIGdlb21fbGluZSgpICsKICBnZW9tX2FibGluZShjb2xvdXIgPSAiZ3JleTUwIikgKwogIHRoZW1lX21pbmltYWwoKSArCiAgY29vcmRfZml4ZWQoKQoKcGxvdGx5OjpnZ3Bsb3RseShyb2NfcGxvdCkKYGBgCgoKIyBSYW5kb20gRm9yZXN0IApgYGB7cn0KaW5mb3MgPC0gZ2V0TW9kZWxJbmZvKCJyYW5nZXIiLCBGQUxTRSkkcmFuZ2VyCgpgYGAKCmBgYHtyfQp0cmFpbl9jb250cm9sX3JmIDwtIHRyYWluQ29udHJvbCgKICBtZXRob2QgPSAiY3YiLAogIG51bWJlciA9IDUsCiAgY2xhc3NQcm9icyA9IFRSVUUsCiAgc3VtbWFyeUZ1bmN0aW9uID0gdHdvQ2xhc3NTdW1tYXJ5LAogIHZlcmJvc2VJdGVyID0gMQopCgojIERJQ0E6IHJvZGUKIyBpbmZvIDwtIGdldE1vZGVsSW5mbygicmFuZ2VyIiwgRkFMU0UpJHJhbmdlcgojIGluZm8kcGFyYW1ldGVycwojIAojIGdyaWRfcmYgPC0gZXhwYW5kLmdyaWQoCiMgICBtdHJ5ID0gYygyLCA0LCA2KSwgIyBQUkVFTkNIQSBBUVVJCiMgICBtaW4ubm9kZS5zaXplID0gc2VxKDEwLCAxMDAsIGJ5ID0gMjApLAojICAgc3BsaXRydWxlID0gImdpbmkiCiMgKQoKbW9kZWxvX3JmIDwtIHRyYWluKAogIHJlY2VpdGEsCiAgY3JlZGl0X2RhdGEgJT4lIGZpbHRlcihiYXNlICVpbiUgInRyZWlubyIpICU+JSBzZWxlY3QoLWJhc2UpLAogIG1ldGhvZCA9ICJyYW5nZXIiLCAjUFJFRU5DSEEgQVFVSQogIGltcG9ydGFuY2UgPSAicGVybXV0YXRpb24iLAogIG1ldHJpYyA9ICJST0MiLAogIHRyQ29udHJvbCA9IHRyYWluX2NvbnRyb2xfcmYsCiAgdHVuZUxlbmd0aCA9IDIwCikKCiMgc2F2ZShtb2RlbG9fcmYsICJtb2RlbG9fcmYuUkRhdGEiKQoKbG9hZCgibW9kZWxvX3JmLlJEYXRhIikKYGBgCgojIyBSZXN1bHRhZG8KYGBge3J9Cm1vZGVsb19yZgptb2RlbG9fcmYkYmVzdFR1bmUKdmFySW1wKG1vZGVsb19yZikKcGxvdChtb2RlbG9fcmYpCmBgYAoKYGBge3J9CiMgUHJlZGljb2VzCgpjcmVkaXRfZGF0YSA8LSBjcmVkaXRfZGF0YSAlPiUgCiAgbXV0YXRlKAogICAgcHJlZF9yZiA9IHByZWRpY3QobW9kZWxvX3JmLCAuLCB0eXBlID0gInByb2IiKSRiYWQKICApCmBgYAoKCmBgYHtyfQojIE1hdHJpeiBkZSBjb25mdXPDo28KY3JlZGl0X2RhdGFfdGVzdGUgPC0gY3JlZGl0X2RhdGEgJT4lIGZpbHRlcihiYXNlICVpbiUgInRlc3RlIikKY2FyZXQ6OmNvbmZ1c2lvbk1hdHJpeChwcmVkaWN0KG1vZGVsb19yZiwgY3JlZGl0X2RhdGFfdGVzdGUpLCBjcmVkaXRfZGF0YV90ZXN0ZSRTdGF0dXMsIG1vZGUgPSAiZXZlcnl0aGluZyIpCmBgYAoKYGBge3J9CiNjdXJ2YSBST0MgIC0tLS0gY3VpZGFkbzogY8OzZGlnb3MgZGUgUiBhdmFuw6dhZG9zIQpyb2NzIDwtIGNyZWRpdF9kYXRhICU+JQogIG11dGF0ZSgKICAgIFN0YXR1c19wYXJhX3JvYyA9IGZhY3RvcihpZl9lbHNlKFN0YXR1cyA9PSAiZ29vZCIsIDAsIDEpKQogICkgJT4lCiAgc2VsZWN0KGJhc2UsIFN0YXR1c19wYXJhX3JvYywgc3RhcnRzX3dpdGgoInByZWQiKSkgJT4lCiAgZ2F0aGVyKG1vZGVsbywgdmFsb3JfcHJlZGl0bywgc3RhcnRzX3dpdGgoInByZWQiKSkgJT4lCiAgZ3JvdXBfYnkoYmFzZSwgbW9kZWxvKSAlPiUKICBuZXN0KCkgJT4lCiAgbXV0YXRlKAogICAgcm9jID0gbWFwKGRhdGEsIH4gcm9jKC54JHZhbG9yX3ByZWRpdG8sIC54JFN0YXR1c19wYXJhX3JvYykpLAogICAgYXVjID0gbWFwX2RibChyb2MsIGF1YykKICApCgpyb2NzCmBgYAoKYGBge3J9CiMgQ29tcGFyYWNhbyBkZSBtb2RlbG9zCnJvY3MgJT4lCiAgZ2dwbG90KGFlcyh4ID0gYXVjLCB5ID0gbW9kZWxvLCBjb2xvdXIgPSBiYXNlKSkgKwogIGdlb21fcG9pbnQoc2l6ZSA9IDUpICsKICB0aGVtZV9taW5pbWFsKDMwKQpgYGAKCgpgYGB7cn0KIyBncsOhZmljbyBleHRyYSAtLS0tIGN1aWRhZG86IGPDs2RpZ29zIGRlIFIgYXZhbsOnYWRvcyEKcm9jX3Bsb3QgPC0gcm9jcyAlPiUKICBzZWxlY3QoYmFzZSwgbW9kZWxvLCByb2MpICU+JQogIG11dGF0ZSgKICAgIHJvYyA9IG1hcChyb2MsIH57CiAgICAgIC54ICU+JSAKICAgICAgICB1bmNsYXNzICU+JSAKICAgICAgICBhcy5kYXRhLmZyYW1lCiAgICB9KQogICkgJT4lCiAgdW5uZXN0ICU+JQogIGdncGxvdChhZXMoeCA9IGZwciwgeSA9IHRwciwgY29sb3VyID0gbW9kZWxvLCBsYWJlbCA9IGN1dG9mZnMpKSArCiAgZ2VvbV9saW5lKCkgKwogIGdlb21fYWJsaW5lKGNvbG91ciA9ICJncmV5NTAiKSArCiAgdGhlbWVfbWluaW1hbCgpICsKICBjb29yZF9maXhlZCgpICsKICBmYWNldF93cmFwKH5iYXNlKQoKcGxvdGx5OjpnZ3Bsb3RseShyb2NfcGxvdCkKYGBgCgoKCgoKIyBYR0Jvb3N0CgpFeGVyY8OtY2lvOiBBanVzdGUgdW0geGdib29zdCB1c2FuZG8gbyBjYXJldCBlIHJlc3BvbmRhOiBxdWFsIG1vZGVsbyBhcHJlc2VudGEgYSBtYWlvciBBVUM/IGNydGwrQyBjdHJsK1YgcG9yIHN1YSBjb250YSEKCkRJQ0EgMSkgdHJvcXVlICJyYW5nZXIiIHBvciAieGdiVHJlZSIKRElDQSAyKSByb2RlIGBpbmZvIDwtIGdldE1vZGVsSW5mbygieGdiVHJlZSIsIEZBTFNFKSR4Z2JUcmVlYCBlIGRlcG9pcyBjb25zdWx0ZSBgaW5mbyRwYXJhbWV0ZXJzYC4KRElDQSAzKSBleHBlcmltZW50ZSB1c2FyIG8gcGFyw6JtZXRybyBgdHVuZUxlbmd0aCA9IDIwYCBlbSB2ZXogZG8gYGB0dW5lR3JpZGAu